In [1]:
import glob
import math
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import random
import sklearn.metrics as metrics

from tensorflow.keras import optimizers
from tensorflow.keras import backend
from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger, LearningRateScheduler
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import add, concatenate, Conv2D, Dense, Dropout, Flatten, Input, Lambda
from tensorflow.keras.layers import Activation, AveragePooling2D, BatchNormalization, MaxPooling2D, ZeroPadding2D
from tensorflow.keras.regularizers import l2
from tensorflow.keras.utils import to_categorical


%matplotlib inline
In [2]:
                            # Set up 'ggplot' style
plt.style.use('ggplot')     # if want to use the default style, set 'classic'
plt.rcParams['ytick.right']     = True
plt.rcParams['ytick.labelright']= True
plt.rcParams['ytick.left']      = False
plt.rcParams['ytick.labelleft'] = False
plt.rcParams['font.family']     = 'Arial'
In [3]:
# where am i?
%pwd
Out[3]:
'C:\\Users\\david\\Documents\\ImageNet'
In [4]:
%ls
 Volume in drive C is Acer
 Volume Serial Number is F2E5-64E8

 Directory of C:\Users\david\Documents\ImageNet

09/16/2019  06:31 AM    <DIR>          .
09/16/2019  06:31 AM    <DIR>          ..
09/09/2019  01:02 AM                43 .gitattributes
08/22/2019  11:06 PM                26 .gitignore
09/15/2019  02:47 PM    <DIR>          .ipynb_checkpoints
09/14/2019  04:34 PM         1,216,519 Create_Train_Test_Set.ipynb
09/14/2019  03:53 PM    <DIR>          data
08/22/2019  11:09 PM           455,126 Download-ImageNet.html
09/09/2019  12:35 AM           288,923 Download-ImageNet.ipynb
09/03/2019  09:40 PM           367,769 Download-Pexels.html
09/09/2019  12:35 AM            94,549 Download-Pexels.ipynb
09/09/2019  01:02 AM        10,518,772 fgs-imgs.npz
09/08/2019  11:18 PM        41,976,052 fgs-imgs128.npz
09/08/2019  11:18 PM        23,611,636 fgs-imgs96.npz
09/14/2019  03:57 PM        49,130,740 fgsOpnImg-imgs96.npz
09/16/2019  06:28 AM            14,880 FlowerPower.csv
09/16/2019  06:07 AM        98,136,496 FlowerPower.hdf5
09/14/2019  03:06 PM       226,409,716 flr102-imgs96.npz
09/09/2019  01:02 AM        15,728,884 flr-imgs.npz
09/08/2019  11:18 PM        62,374,132 flr-imgs128.npz
09/08/2019  11:18 PM        35,085,556 flr-imgs96.npz
09/09/2019  01:02 AM        13,295,860 flrnonflr-test-imgs.npz
09/08/2019  11:18 PM        52,445,428 flrnonflr-test-imgs128.npz
09/08/2019  11:18 PM        29,500,660 flrnonflr-test-imgs96-0.8.npz
09/14/2019  04:13 PM       102,187,252 flrnonflr-test-imgs96-0.8+.npz
09/08/2019  11:18 PM        14,764,276 flrnonflr-test-imgs96-0.9.npz
09/09/2019  01:02 AM             8,900 flrnonflr-test-labels.npz
09/08/2019  11:18 PM             8,780 flrnonflr-test-labels128.npz
09/08/2019  11:18 PM             8,780 flrnonflr-test-labels96-0.8.npz
09/14/2019  07:39 PM            29,812 flrnonflr-test-labels96-0.8+.npz
09/08/2019  11:18 PM             4,516 flrnonflr-test-labels96-0.9.npz
09/09/2019  01:02 AM        53,133,556 flrnonflr-train-imgs.npz
09/08/2019  11:18 PM       209,584,372 flrnonflr-train-imgs128.npz
09/08/2019  11:18 PM       117,891,316 flrnonflr-train-imgs96-0.8.npz
09/14/2019  04:13 PM       408,748,276 flrnonflr-train-imgs96-0.8+.npz
09/08/2019  11:18 PM       132,627,700 flrnonflr-train-imgs96-0.9.npz
09/09/2019  01:02 AM            34,836 flrnonflr-train-labels.npz
09/08/2019  11:18 PM            34,356 flrnonflr-train-labels128.npz
09/08/2019  11:18 PM            34,356 flrnonflr-train-labels96-0.8.npz
09/14/2019  04:13 PM           118,516 flrnonflr-train-labels96-0.8+.npz
09/08/2019  11:18 PM            38,620 flrnonflr-train-labels96-0.9.npz
08/17/2019  11:53 AM           124,162 ImageNet-Flowers.txt
08/17/2019  03:54 PM            75,692 ImageNet-Fungus.txt
08/17/2019  03:57 PM            81,424 ImageNet-Rocks.txt
09/15/2019  09:58 PM            66,035 Inception-ResNet-v1 & v2.ipynb
09/15/2019  03:16 PM            58,343 Inception-v4.ipynb
09/14/2019  11:39 PM            26,103 model.pdf
09/14/2019  07:39 PM    <DIR>          npz
09/03/2019  09:40 PM           128,688 Pexels-Flowers.txt
09/03/2019  09:40 PM            28,575 Pexels-Umbrellas.txt
09/09/2019  01:02 AM        22,733,044 pxl_flr-imgs.npz
09/08/2019  11:18 PM        88,080,628 pxl_flr-imgs128.npz
09/08/2019  11:18 PM        49,545,460 pxl_flr-imgs96.npz
09/09/2019  01:02 AM         5,173,492 pxl_umb-imgs.npz
09/08/2019  11:18 PM        20,594,932 pxl_umb-imgs128.npz
09/08/2019  11:18 PM        11,584,756 pxl_umb-imgs96.npz
09/09/2019  01:02 AM        12,275,956 rck-imgs.npz
09/08/2019  11:18 PM        49,004,788 rck-imgs128.npz
09/08/2019  11:18 PM        27,565,300 rck-imgs96.npz
09/14/2019  04:01 PM    <DIR>          readings
08/22/2019  11:02 PM                44 README.md
09/14/2019  04:21 PM           417,457 Reshape_Resize_Images.ipynb
09/09/2019  12:48 AM         8,546,104 train_Neural_Network (Conv2D, 96-0.8).html
09/15/2019  10:09 PM         2,427,075 train_Neural_Network (InceptionResNetV2, 96-0.8, Added data, try13).html
09/15/2019  02:35 AM        12,032,935 train_Neural_Network (InceptionV4, 96-0.8, Dropout + BatchNorm + Added data, try10).html
09/15/2019  11:36 AM         2,387,331 train_Neural_Network (InceptionV4, 96-0.8, Dropout + BatchNorm + Added data, try11).html
09/15/2019  05:42 PM         2,291,568 train_Neural_Network (InceptionV4, 96-0.8, Dropout + BatchNorm + Added data, try12).html
09/16/2019  06:31 AM         5,790,782 train_Neural_Network (InceptionV4, 96-0.8, Dropout + BatchNorm + Added data, try14).html
09/14/2019  08:36 PM         7,071,416 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer + RMSProp + Added data, try9).html
09/11/2019  01:01 AM         4,494,650 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer + RMSProp, try6).html
09/11/2019  10:59 PM         6,116,768 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer + RMSProp, try7).html
09/12/2019  02:35 AM         5,851,809 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer + RMSProp, try8).html
09/09/2019  03:08 AM         3,900,219 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer, try3).html
09/09/2019  11:09 PM         6,528,529 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer, try4).html
09/10/2019  08:44 PM         6,636,754 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer, try5).html
09/09/2019  01:32 AM         6,583,279 train_Neural_Network (ResNetV1, 96-0.8, Dropout, try1).html
09/09/2019  02:40 AM         6,300,696 train_Neural_Network (ResNetV1, 96-0.8, Dropout, try2).html
09/09/2019  01:23 AM         6,446,135 train_Neural_Network (ResNetV1, 96-0.8, no Dropout, try1).html
09/16/2019  06:31 AM        16,052,591 train_Neural_Network.ipynb
09/14/2019  04:08 PM        88,003,828 umbOpnImg-imgs96.npz
09/14/2019  07:39 PM         2,094,090 VGG_Model_Setup.ipynb
09/14/2019  07:39 PM            17,772 VGG_Model_Train_Test.ipynb
              76 File(s)  2,187,049,197 bytes
               6 Dir(s)  85,549,551,616 bytes free
In [5]:
flowers = glob.glob('./data/flr_*.jpg')
fungus = glob.glob('./data/fgs_*.jpg')
rocks = glob.glob('./data/rck_*.jpg')

pixel_flowers = glob.glob('./data/pxl_flower_*.jpeg')
pixel_umbrella = glob.glob('./data/pxl_umbrella_*.jpeg')
print("There are %s, %s flower, %s fungus, %s rock and %s umbrella pictures" %(len(flowers), len(pixel_flowers), len(fungus), len(rocks), len(pixel_umbrella)))
There are 1269, 1792 flower, 856 fungus, 1007 rock and 420 umbrella pictures
In [6]:
# Randomly show 10 examples of the images
from IPython.display import Image
    
dataset = flowers #flowers #fungus #rocks

for i in range(0, 5):
    index = random.randint(0, len(dataset)-1)   
    print("Showing:", dataset[index])
    
    img = mpimg.imread(dataset[index])
    imgplot = plt.imshow(img)
    plt.show()

#Image(dataset[index])
Showing: ./data\flr_01149.jpg
Showing: ./data\flr_01639.jpg
Showing: ./data\flr_00363.jpg
Showing: ./data\flr_01880.jpg
Showing: ./data\flr_01680.jpg

Extract the training and testing datasets

In [7]:
# Load the data
trDatOrg       = np.load('flrnonflr-train-imgs96-0.8+.npz')['arr_0']
trLblOrg       = np.load('flrnonflr-train-labels96-0.8+.npz')['arr_0']
tsDatOrg       = np.load('flrnonflr-test-imgs96-0.8+.npz')['arr_0']
tsLblOrg       = np.load('flrnonflr-test-labels96-0.8+.npz')['arr_0']
In [8]:
print("For the training and test datasets:")
print("The shapes are %s, %s, %s, %s" \
      %(trDatOrg.shape, trLblOrg.shape, tsDatOrg.shape, tsLblOrg.shape))
For the training and test datasets:
The shapes are (14784, 96, 96, 3), (14784,), (3696, 96, 96, 3), (3696,)
In [9]:
# Randomly show 10 examples of the images

data = tsDatOrg
label = tsLblOrg

for i in range(20):
    index = random.randint(0, len(data)-1)
    print("Showing %s index image, It is %s" %(index, label[index]))
    imgplot = plt.imshow(data[index])
    plt.show()
Showing 2132 index image, It is 1.0
Showing 2446 index image, It is 0.0
Showing 2733 index image, It is 0.0
Showing 2795 index image, It is 0.0
Showing 2361 index image, It is 0.0
Showing 1673 index image, It is 1.0
Showing 1502 index image, It is 1.0
Showing 3005 index image, It is 0.0
Showing 2014 index image, It is 1.0
Showing 2768 index image, It is 0.0
Showing 3574 index image, It is 0.0
Showing 746 index image, It is 1.0
Showing 603 index image, It is 1.0
Showing 1780 index image, It is 1.0
Showing 1891 index image, It is 1.0
Showing 1085 index image, It is 1.0
Showing 2683 index image, It is 0.0
Showing 3330 index image, It is 0.0
Showing 1202 index image, It is 1.0
Showing 357 index image, It is 1.0
In [10]:
# Convert the data into 'float32'
# Rescale the values from 0~255 to 0~1
trDat       = trDatOrg.astype('float32')/255
tsDat       = tsDatOrg.astype('float32')/255

# Retrieve the row size of each image
# Retrieve the column size of each image
imgrows     = trDat.shape[1]
imgclms     = trDat.shape[2]
channel     = 3

# # reshape the data to be [samples][width][height][channel]
# # This is required by Keras framework
# trDat       = trDat.reshape(trDat.shape[0], imgrows, imgclms, channel)
# tsDat       = tsDat.reshape(tsDat.shape[0], imgrows, imgclms, channel)

# Perform one hot encoding on the labels
# Retrieve the number of classes in this problem
trLbl       = to_categorical(trLblOrg)
tsLbl       = to_categorical(tsLblOrg)
num_classes = tsLbl.shape[1]
In [11]:
# fix random seed for reproducibility
seed = 29
np.random.seed(seed)


modelname = 'FlowerPower'

#optmz = optimizers.Adam(lr=0.001)
optmz = optimizers.RMSprop(lr=0.001)
In [12]:
# Baseline Model -> func: createBaselineModel()

def createBaselineModel():
    inputs = Input(shape=(imgrows, imgclms, channel))
    x = Conv2D(30, (4, 4), activation='relu')(inputs)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Conv2D(50, (4, 4), activation='relu')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Dropout(0.3)(x)
    x = Flatten()(x)
    x = Dense(32, activation='relu')(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=[inputs],outputs=x)
    
    model.compile(loss='categorical_crossentropy', 
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
In [13]:
# ResNetV1 -> func: createResNetV1()
def resLyr(inputs,
           numFilters=16,
           kernelSz=3,
           strides=1,
           activation='relu',
           batchNorm=True,
           convFirst=True,
           lyrName=None):
    convLyr = Conv2D(numFilters, kernel_size=kernelSz, strides=strides, 
                     padding='same', kernel_initializer='he_normal', 
                     kernel_regularizer=l2(1e-4), 
                     name=lyrName+'_conv' if lyrName else None)
    x = inputs
    if convFirst:
        x = convLyr(x)
        if batchNorm:
            x = BatchNormalization(name=lyrName+'_bn' if lyrName else None)(x)
        if activation is not None:
            x = Activation(activation,name=lyrName+'_'+activation if lyrName else None)(x)
    else:
        if batchNorm:
            x = BatchNormalization(name=lyrName+'_bn' if lyrName else None)(x)
        if activation is not None:
            x = Activation(activation, name=lyrName+'_'+activation if lyrName else None)(x)
        x = convLyr(x)
    return x


def resBlkV1(inputs,
             numFilters=16,
             numBlocks=3,
             downsampleOnFirst=True,
             names=None):
    x = inputs
    for run in range(0,numBlocks):
        strides = 1
        blkStr = str(run+1)
        if downsampleOnFirst and run == 0:
            strides = 2
        y = resLyr(inputs=x, numFilters=numFilters, strides=strides,
                   lyrName=names+'_Blk'+blkStr+'_Res1' if names else None)
        y = resLyr(inputs=y, numFilters=numFilters, activation=None,
                   lyrName=names+'_Blk'+blkStr+'_Res2' if names else None)
        if downsampleOnFirst and run == 0:
            x = resLyr(inputs=x, numFilters=numFilters, kernelSz=1,
                       strides=strides, activation=None, batchNorm=False,
                       lyrName=names+'_Blk'+blkStr+'_lin' if names else None)
        x = add([x,y], name=names+'_Blk'+blkStr+'_add' if names else None)
        x = Activation('relu', name=names+'_Blk'+blkStr+'_relu' if names else None)(x)
    return x

def createResNetV1(inputShape=(imgrows, imgclms, channel),
                   numClasses=2):
    inputs = Input(shape=inputShape)
    v = resLyr(inputs, lyrName='Inpt')
    v = resBlkV1(inputs=v, numFilters=16, numBlocks=3,
                 downsampleOnFirst=False, names='Stg1')
    v = Dropout(0.30)(v)
    v = resBlkV1(inputs=v, numFilters=32, numBlocks=3,
                 downsampleOnFirst=True, names='Stg2')
    v = Dropout(0.40)(v)
    v = resBlkV1(inputs=v, numFilters=64, numBlocks=3,
                 downsampleOnFirst=True, names='Stg3')
    v = Dropout(0.50)(v)
    v = resBlkV1(inputs=v, numFilters=128, numBlocks=3,
                 downsampleOnFirst=True, names='Stg4')
    v = Dropout(0.50)(v)
    v = resBlkV1(inputs=v, numFilters=128, numBlocks=3,
                 downsampleOnFirst=False, names='Stg5')
    v = Dropout(0.50)(v)
    v = resBlkV1(inputs=v, numFilters=256, numBlocks=3,
                 downsampleOnFirst=True, names='Stg6')
    v = Dropout(0.50)(v)
    v = AveragePooling2D(pool_size=6, name='AvgPool')(v)
    v = Flatten()(v) 
    outputs = Dense(numClasses, activation='softmax', 
                    kernel_initializer='he_normal')(v)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer=optmz, 
                  metrics=['accuracy'])
    return model
In [14]:
# Mostly Original # Inception-v4 -> func: create_inception_v4()

def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    x_R2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=64, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=64, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_a_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_2)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_2, x_ER1_3])
    return x

def inception_b_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=224, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_1)
    x_MR1_3 = Conv2D(filters=256, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_2)
    
    x_ER1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=192, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=224, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_4 = Conv2D(filters=224, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_3)
    x_ER1_5 = Conv2D(filters=256, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_4)

    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_3, x_ER1_5])
    return x

def inception_c_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_1_L2 = Conv2D(filters=256, kernel_size=(1, 3), strides=1, padding='same')(x_MR1_1)
    x_MR1_1_R2 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=448, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=512, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_3_L1 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_3)
    x_ER1_3_R1 = Conv2D(filters=256, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_3)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_1_L2, x_MR1_1_R2, x_ER1_3_L1, x_ER1_3_R1])
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 192, 224, 256, 384
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 192, 192, 256, 384
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 256, 256, 384, 384
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def reduction_b_block(inputs,
                      names=None):

        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x_M_1)
    
    x_R_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=256, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=320, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    x_R_4 = Conv2D(filters=320, kernel_size=(3, 3), strides=2, padding='valid')(x_R_3)
    
    x = concatenate([x_L_1, x_M_2, x_R_4])
    return x

def create_inception_v4(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-v4"
    inputs = Input(shape=inputShape)
    x = stem_block(inputs)
    x = inception_a_block(x)
    x = inception_a_block(x)
    x = inception_a_block(x)
    x = inception_a_block(x)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = reduction_b_block(x)
    
    x = inception_c_block(x)
    x = inception_c_block(x)
    x = inception_c_block(x)
    
    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(1536)(x) # Changed
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer="Adam", 
                  metrics=['accuracy'])
    return model
In [15]:
# Modified2 # Inception-v4 -> func: create_inception_v4()

def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    x_R2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=64, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=64, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_a_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_2)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_2, x_ER1_3])
    return x

def inception_b_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=224, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_1)
    x_MR1_3 = Conv2D(filters=256, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_2)
    
    x_ER1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=192, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=224, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_4 = Conv2D(filters=224, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_3)
    x_ER1_5 = Conv2D(filters=256, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_4)

    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_3, x_ER1_5])
    return x

def inception_c_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_1_L2 = Conv2D(filters=256, kernel_size=(1, 3), strides=1, padding='same')(x_MR1_1)
    x_MR1_1_R2 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=448, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=512, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_3_L1 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_3)
    x_ER1_3_R1 = Conv2D(filters=256, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_3)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_1_L2, x_MR1_1_R2, x_ER1_3_L1, x_ER1_3_R1])
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 192, 224, 256, 384
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 192, 192, 256, 384
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 256, 256, 384, 384
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def reduction_b_block(inputs,
                      names=None):

        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x_M_1)
    
    x_R_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=256, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=320, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    x_R_4 = Conv2D(filters=320, kernel_size=(3, 3), strides=2, padding='valid')(x_R_3)
    
    x = concatenate([x_L_1, x_M_2, x_R_4])
    return x

def create_inception_v4(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-v4"
    inputs = Input(shape=inputShape)
    x = stem_block(inputs)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = reduction_b_block(x)
    
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    
    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(1536)(x) # Changed
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer="Adam", 
                  metrics=['accuracy'])
    return model
In [16]:
# Modified #(halfed) # Inception-v4 -> func: create_inception_v4()

def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=16, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=48, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    x_R2_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=32, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=32, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_a_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=48, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=48, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_2)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_2, x_ER1_3])
    return x

def inception_b_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=112, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_1)
    x_MR1_3 = Conv2D(filters=128, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_2)
    
    x_ER1_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=96, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=112, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_4 = Conv2D(filters=112, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_3)
    x_ER1_5 = Conv2D(filters=128, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_4)

    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_3, x_ER1_5])
    return x

def inception_c_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_1_L2 = Conv2D(filters=128, kernel_size=(1, 3), strides=1, padding='same')(x_MR1_1)
    x_MR1_1_R2 = Conv2D(filters=128, kernel_size=(3, 1), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=224, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_3_L1 = Conv2D(filters=128, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_3)
    x_ER1_3_R1 = Conv2D(filters=128, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_3)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_1_L2, x_MR1_1_R2, x_ER1_3_L1, x_ER1_3_R1])
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 96, 112, 128, 192
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 96, 96, 128, 192
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 128, 128, 192, 192
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def reduction_b_block(inputs,
                      names=None):

        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x_M_1)
    
    x_R_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=128, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=160, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    x_R_4 = Conv2D(filters=160, kernel_size=(3, 3), strides=2, padding='valid')(x_R_3)
    
    x = concatenate([x_L_1, x_M_2, x_R_4])
    return x

def create_inception_v4(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-v4"
    inputs = Input(shape=inputShape)
    x = stem_block(inputs)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = reduction_b_block(x)
    
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    x = BatchNormalization()(x)
    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(256)(x)
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer=optmz, 
                  metrics=['accuracy'])
    return model 
In [17]:
# Mostly Original # Inception-Res-v2 -> func: create_inception_resnet_v2()
def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    
    x_R2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=64, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=64, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_resnet_v2_a_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs
    
    x_L_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_M_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='same')(x_M_1)
    
    x_R_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_M_2, x_R_3])
    x_C_2 = Conv2D(filters=384, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)
    
    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def inception_resnet_v2_b_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs

    x_L_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_R_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=160, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=192, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_R_3])
    x_C_2 = Conv2D(filters=1152, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)

    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def inception_resnet_v2_c_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs

    x_L_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_R_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=224, kernel_size=(1, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_R_3])
    x_C_2 = Conv2D(filters=2048, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)
    
    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 192, 224, 256, 384
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 192, 192, 256, 384
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 256, 256, 384, 384
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def inception_resnet_v2_reduction_b_block(inputs,
                      names=None):   
    x = inputs
    
    x_EL_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_ML_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ML_2 = Conv2D(filters=384, kernel_size=(3, 3), strides=2, padding='valid')(x_ML_1)
    
    x_MR_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR_2 = Conv2D(filters=256, kernel_size=(3, 3), strides=2, padding='valid')(x_MR_1)
    
    x_ER_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER_2 = Conv2D(filters=256, kernel_size=(3, 3), strides=1, padding='same')(x_ER_1)
    x_ER_3 = Conv2D(filters=256, kernel_size=(3, 3), strides=2, padding='valid')(x_ER_2)
    
    x = concatenate([x_EL_1, x_ML_2, x_MR_2, x_ER_3])
    return x

def create_inception_resnet_v2(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-ResNet-v2"
    inputs = Input(shape=inputShape)
    x = stem_block(inputs)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_reduction_b_block(x)
    
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(1792)(x)
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer="Adam", 
                  metrics=['accuracy'])
    return model
In [18]:
# Modified # Inception-Res-v2 -> func: create_inception_resnet_v2()
def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    
    x_R2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=64, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=64, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_resnet_v2_a_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs
    
    x_L_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_M_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='same')(x_M_1)
    
    x_R_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_M_2, x_R_3])
    x_C_2 = Conv2D(filters=384, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)
    
    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def inception_resnet_v2_b_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs

    x_L_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_R_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=160, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=192, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_R_3])
    x_C_2 = Conv2D(filters=1152, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)

    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def inception_resnet_v2_c_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs

    x_L_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_R_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=224, kernel_size=(1, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_R_3])
    x_C_2 = Conv2D(filters=2048, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)
    
    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 192, 224, 256, 384
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 192, 192, 256, 384
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 256, 256, 384, 384
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def inception_resnet_v2_reduction_b_block(inputs,
                      names=None):   
    x = inputs
    
    x_EL_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_ML_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ML_2 = Conv2D(filters=384, kernel_size=(3, 3), strides=2, padding='valid')(x_ML_1)
    
    x_MR_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR_2 = Conv2D(filters=256, kernel_size=(3, 3), strides=2, padding='valid')(x_MR_1)
    
    x_ER_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER_2 = Conv2D(filters=256, kernel_size=(3, 3), strides=1, padding='same')(x_ER_1)
    x_ER_3 = Conv2D(filters=256, kernel_size=(3, 3), strides=2, padding='valid')(x_ER_2)
    
    x = concatenate([x_EL_1, x_ML_2, x_MR_2, x_ER_3])
    return x

def create_inception_resnet_v2(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-ResNet-v2"
    inputs = Input(shape=inputShape)
    
    batch_norm = True
    no_a_block = 3
    no_b_block = 5
    no_c_block = 3
    
    x = stem_block(inputs)
    
    for i in range(no_a_block):
        if batch_norm == True:
            x = BatchNormalization()(x)
        x = inception_resnet_v2_a_block(x, scale=0.1)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    
    for i in range(no_b_block):
        if batch_norm == True:
            x = BatchNormalization()(x)
        x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_reduction_b_block(x)
    
    for i in range(no_c_block):
        if batch_norm == True:
            x = BatchNormalization()(x)
        x = inception_resnet_v2_c_block(x, scale=0.1)

    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(1792)(x)
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer="Adam", 
                  metrics=['accuracy'])
    return model
In [19]:
# Setup the models
model       = create_inception_v4() # This is meant for training
modelGo     = create_inception_v4() # This is used for final testing

model.summary()
WARNING:tensorflow:From D:\DocumentsDDrive\Installed_Files\Anaconda3\envs\tf-gpu\lib\site-packages\tensorflow\python\keras\initializers.py:104: calling VarianceScaling.__init__ (from tensorflow.python.ops.init_ops) with distribution=normal is deprecated and will be removed in a future version.
Instructions for updating:
`normal` is a deprecated alias for `truncated_normal`
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_1 (InputLayer)            (None, 96, 96, 3)    0                                            
__________________________________________________________________________________________________
conv2d (Conv2D)                 (None, 47, 47, 32)   896         input_1[0][0]                    
__________________________________________________________________________________________________
conv2d_1 (Conv2D)               (None, 45, 45, 32)   9248        conv2d[0][0]                     
__________________________________________________________________________________________________
conv2d_2 (Conv2D)               (None, 45, 45, 64)   18496       conv2d_1[0][0]                   
__________________________________________________________________________________________________
max_pooling2d (MaxPooling2D)    (None, 22, 22, 64)   0           conv2d_2[0][0]                   
__________________________________________________________________________________________________
conv2d_3 (Conv2D)               (None, 22, 22, 96)   55392       conv2d_2[0][0]                   
__________________________________________________________________________________________________
concatenate (Concatenate)       (None, 22, 22, 160)  0           max_pooling2d[0][0]              
                                                                 conv2d_3[0][0]                   
__________________________________________________________________________________________________
conv2d_6 (Conv2D)               (None, 22, 22, 64)   10304       concatenate[0][0]                
__________________________________________________________________________________________________
conv2d_7 (Conv2D)               (None, 22, 22, 64)   28736       conv2d_6[0][0]                   
__________________________________________________________________________________________________
conv2d_4 (Conv2D)               (None, 22, 22, 64)   10304       concatenate[0][0]                
__________________________________________________________________________________________________
conv2d_8 (Conv2D)               (None, 22, 22, 64)   28736       conv2d_7[0][0]                   
__________________________________________________________________________________________________
conv2d_5 (Conv2D)               (None, 20, 20, 96)   55392       conv2d_4[0][0]                   
__________________________________________________________________________________________________
conv2d_9 (Conv2D)               (None, 20, 20, 96)   55392       conv2d_8[0][0]                   
__________________________________________________________________________________________________
concatenate_1 (Concatenate)     (None, 20, 20, 192)  0           conv2d_5[0][0]                   
                                                                 conv2d_9[0][0]                   
__________________________________________________________________________________________________
conv2d_10 (Conv2D)              (None, 9, 9, 192)    331968      concatenate_1[0][0]              
__________________________________________________________________________________________________
zero_padding2d (ZeroPadding2D)  (None, 10, 10, 192)  0           conv2d_10[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D)  (None, 10, 10, 192)  0           concatenate_1[0][0]              
__________________________________________________________________________________________________
concatenate_2 (Concatenate)     (None, 10, 10, 384)  0           zero_padding2d[0][0]             
                                                                 max_pooling2d_1[0][0]            
__________________________________________________________________________________________________
batch_normalization (BatchNorma (None, 10, 10, 384)  1536        concatenate_2[0][0]              
__________________________________________________________________________________________________
conv2d_15 (Conv2D)              (None, 10, 10, 32)   12320       batch_normalization[0][0]        
__________________________________________________________________________________________________
average_pooling2d (AveragePooli (None, 10, 10, 384)  0           batch_normalization[0][0]        
__________________________________________________________________________________________________
conv2d_13 (Conv2D)              (None, 10, 10, 32)   12320       batch_normalization[0][0]        
__________________________________________________________________________________________________
conv2d_16 (Conv2D)              (None, 10, 10, 48)   13872       conv2d_15[0][0]                  
__________________________________________________________________________________________________
conv2d_11 (Conv2D)              (None, 10, 10, 48)   18480       average_pooling2d[0][0]          
__________________________________________________________________________________________________
conv2d_12 (Conv2D)              (None, 10, 10, 48)   18480       batch_normalization[0][0]        
__________________________________________________________________________________________________
conv2d_14 (Conv2D)              (None, 10, 10, 48)   13872       conv2d_13[0][0]                  
__________________________________________________________________________________________________
conv2d_17 (Conv2D)              (None, 10, 10, 48)   20784       conv2d_16[0][0]                  
__________________________________________________________________________________________________
concatenate_3 (Concatenate)     (None, 10, 10, 192)  0           conv2d_11[0][0]                  
                                                                 conv2d_12[0][0]                  
                                                                 conv2d_14[0][0]                  
                                                                 conv2d_17[0][0]                  
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, 10, 10, 192)  768         concatenate_3[0][0]              
__________________________________________________________________________________________________
conv2d_22 (Conv2D)              (None, 10, 10, 32)   6176        batch_normalization_1[0][0]      
__________________________________________________________________________________________________
average_pooling2d_1 (AveragePoo (None, 10, 10, 192)  0           batch_normalization_1[0][0]      
__________________________________________________________________________________________________
conv2d_20 (Conv2D)              (None, 10, 10, 32)   6176        batch_normalization_1[0][0]      
__________________________________________________________________________________________________
conv2d_23 (Conv2D)              (None, 10, 10, 48)   13872       conv2d_22[0][0]                  
__________________________________________________________________________________________________
conv2d_18 (Conv2D)              (None, 10, 10, 48)   9264        average_pooling2d_1[0][0]        
__________________________________________________________________________________________________
conv2d_19 (Conv2D)              (None, 10, 10, 48)   9264        batch_normalization_1[0][0]      
__________________________________________________________________________________________________
conv2d_21 (Conv2D)              (None, 10, 10, 48)   13872       conv2d_20[0][0]                  
__________________________________________________________________________________________________
conv2d_24 (Conv2D)              (None, 10, 10, 48)   20784       conv2d_23[0][0]                  
__________________________________________________________________________________________________
concatenate_4 (Concatenate)     (None, 10, 10, 192)  0           conv2d_18[0][0]                  
                                                                 conv2d_19[0][0]                  
                                                                 conv2d_21[0][0]                  
                                                                 conv2d_24[0][0]                  
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, 10, 10, 192)  768         concatenate_4[0][0]              
__________________________________________________________________________________________________
conv2d_29 (Conv2D)              (None, 10, 10, 32)   6176        batch_normalization_2[0][0]      
__________________________________________________________________________________________________
average_pooling2d_2 (AveragePoo (None, 10, 10, 192)  0           batch_normalization_2[0][0]      
__________________________________________________________________________________________________
conv2d_27 (Conv2D)              (None, 10, 10, 32)   6176        batch_normalization_2[0][0]      
__________________________________________________________________________________________________
conv2d_30 (Conv2D)              (None, 10, 10, 48)   13872       conv2d_29[0][0]                  
__________________________________________________________________________________________________
conv2d_25 (Conv2D)              (None, 10, 10, 48)   9264        average_pooling2d_2[0][0]        
__________________________________________________________________________________________________
conv2d_26 (Conv2D)              (None, 10, 10, 48)   9264        batch_normalization_2[0][0]      
__________________________________________________________________________________________________
conv2d_28 (Conv2D)              (None, 10, 10, 48)   13872       conv2d_27[0][0]                  
__________________________________________________________________________________________________
conv2d_31 (Conv2D)              (None, 10, 10, 48)   20784       conv2d_30[0][0]                  
__________________________________________________________________________________________________
concatenate_5 (Concatenate)     (None, 10, 10, 192)  0           conv2d_25[0][0]                  
                                                                 conv2d_26[0][0]                  
                                                                 conv2d_28[0][0]                  
                                                                 conv2d_31[0][0]                  
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, 10, 10, 192)  768         concatenate_5[0][0]              
__________________________________________________________________________________________________
conv2d_36 (Conv2D)              (None, 10, 10, 32)   6176        batch_normalization_3[0][0]      
__________________________________________________________________________________________________
average_pooling2d_3 (AveragePoo (None, 10, 10, 192)  0           batch_normalization_3[0][0]      
__________________________________________________________________________________________________
conv2d_34 (Conv2D)              (None, 10, 10, 32)   6176        batch_normalization_3[0][0]      
__________________________________________________________________________________________________
conv2d_37 (Conv2D)              (None, 10, 10, 48)   13872       conv2d_36[0][0]                  
__________________________________________________________________________________________________
conv2d_32 (Conv2D)              (None, 10, 10, 48)   9264        average_pooling2d_3[0][0]        
__________________________________________________________________________________________________
conv2d_33 (Conv2D)              (None, 10, 10, 48)   9264        batch_normalization_3[0][0]      
__________________________________________________________________________________________________
conv2d_35 (Conv2D)              (None, 10, 10, 48)   13872       conv2d_34[0][0]                  
__________________________________________________________________________________________________
conv2d_38 (Conv2D)              (None, 10, 10, 48)   20784       conv2d_37[0][0]                  
__________________________________________________________________________________________________
concatenate_6 (Concatenate)     (None, 10, 10, 192)  0           conv2d_32[0][0]                  
                                                                 conv2d_33[0][0]                  
                                                                 conv2d_35[0][0]                  
                                                                 conv2d_38[0][0]                  
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 10, 10, 192)  768         concatenate_6[0][0]              
__________________________________________________________________________________________________
conv2d_40 (Conv2D)              (None, 10, 10, 192)  37056       batch_normalization_4[0][0]      
__________________________________________________________________________________________________
conv2d_41 (Conv2D)              (None, 10, 10, 224)  387296      conv2d_40[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_2 (MaxPooling2D)  (None, 4, 4, 192)    0           batch_normalization_4[0][0]      
__________________________________________________________________________________________________
conv2d_39 (Conv2D)              (None, 4, 4, 384)    663936      batch_normalization_4[0][0]      
__________________________________________________________________________________________________
conv2d_42 (Conv2D)              (None, 4, 4, 256)    516352      conv2d_41[0][0]                  
__________________________________________________________________________________________________
concatenate_7 (Concatenate)     (None, 4, 4, 832)    0           max_pooling2d_2[0][0]            
                                                                 conv2d_39[0][0]                  
                                                                 conv2d_42[0][0]                  
__________________________________________________________________________________________________
batch_normalization_5 (BatchNor (None, 4, 4, 832)    3328        concatenate_7[0][0]              
__________________________________________________________________________________________________
conv2d_48 (Conv2D)              (None, 4, 4, 96)     79968       batch_normalization_5[0][0]      
__________________________________________________________________________________________________
conv2d_49 (Conv2D)              (None, 4, 4, 96)     64608       conv2d_48[0][0]                  
__________________________________________________________________________________________________
conv2d_45 (Conv2D)              (None, 4, 4, 96)     79968       batch_normalization_5[0][0]      
__________________________________________________________________________________________________
conv2d_50 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_49[0][0]                  
__________________________________________________________________________________________________
average_pooling2d_4 (AveragePoo (None, 4, 4, 832)    0           batch_normalization_5[0][0]      
__________________________________________________________________________________________________
conv2d_46 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_45[0][0]                  
__________________________________________________________________________________________________
conv2d_51 (Conv2D)              (None, 4, 4, 112)    87920       conv2d_50[0][0]                  
__________________________________________________________________________________________________
conv2d_43 (Conv2D)              (None, 4, 4, 64)     53312       average_pooling2d_4[0][0]        
__________________________________________________________________________________________________
conv2d_44 (Conv2D)              (None, 4, 4, 192)    159936      batch_normalization_5[0][0]      
__________________________________________________________________________________________________
conv2d_47 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_46[0][0]                  
__________________________________________________________________________________________________
conv2d_52 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_51[0][0]                  
__________________________________________________________________________________________________
concatenate_8 (Concatenate)     (None, 4, 4, 512)    0           conv2d_43[0][0]                  
                                                                 conv2d_44[0][0]                  
                                                                 conv2d_47[0][0]                  
                                                                 conv2d_52[0][0]                  
__________________________________________________________________________________________________
batch_normalization_6 (BatchNor (None, 4, 4, 512)    2048        concatenate_8[0][0]              
__________________________________________________________________________________________________
conv2d_58 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_6[0][0]      
__________________________________________________________________________________________________
conv2d_59 (Conv2D)              (None, 4, 4, 96)     64608       conv2d_58[0][0]                  
__________________________________________________________________________________________________
conv2d_55 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_6[0][0]      
__________________________________________________________________________________________________
conv2d_60 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_59[0][0]                  
__________________________________________________________________________________________________
average_pooling2d_5 (AveragePoo (None, 4, 4, 512)    0           batch_normalization_6[0][0]      
__________________________________________________________________________________________________
conv2d_56 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_55[0][0]                  
__________________________________________________________________________________________________
conv2d_61 (Conv2D)              (None, 4, 4, 112)    87920       conv2d_60[0][0]                  
__________________________________________________________________________________________________
conv2d_53 (Conv2D)              (None, 4, 4, 64)     32832       average_pooling2d_5[0][0]        
__________________________________________________________________________________________________
conv2d_54 (Conv2D)              (None, 4, 4, 192)    98496       batch_normalization_6[0][0]      
__________________________________________________________________________________________________
conv2d_57 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_56[0][0]                  
__________________________________________________________________________________________________
conv2d_62 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_61[0][0]                  
__________________________________________________________________________________________________
concatenate_9 (Concatenate)     (None, 4, 4, 512)    0           conv2d_53[0][0]                  
                                                                 conv2d_54[0][0]                  
                                                                 conv2d_57[0][0]                  
                                                                 conv2d_62[0][0]                  
__________________________________________________________________________________________________
batch_normalization_7 (BatchNor (None, 4, 4, 512)    2048        concatenate_9[0][0]              
__________________________________________________________________________________________________
conv2d_68 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_7[0][0]      
__________________________________________________________________________________________________
conv2d_69 (Conv2D)              (None, 4, 4, 96)     64608       conv2d_68[0][0]                  
__________________________________________________________________________________________________
conv2d_65 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_7[0][0]      
__________________________________________________________________________________________________
conv2d_70 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_69[0][0]                  
__________________________________________________________________________________________________
average_pooling2d_6 (AveragePoo (None, 4, 4, 512)    0           batch_normalization_7[0][0]      
__________________________________________________________________________________________________
conv2d_66 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_65[0][0]                  
__________________________________________________________________________________________________
conv2d_71 (Conv2D)              (None, 4, 4, 112)    87920       conv2d_70[0][0]                  
__________________________________________________________________________________________________
conv2d_63 (Conv2D)              (None, 4, 4, 64)     32832       average_pooling2d_6[0][0]        
__________________________________________________________________________________________________
conv2d_64 (Conv2D)              (None, 4, 4, 192)    98496       batch_normalization_7[0][0]      
__________________________________________________________________________________________________
conv2d_67 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_66[0][0]                  
__________________________________________________________________________________________________
conv2d_72 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_71[0][0]                  
__________________________________________________________________________________________________
concatenate_10 (Concatenate)    (None, 4, 4, 512)    0           conv2d_63[0][0]                  
                                                                 conv2d_64[0][0]                  
                                                                 conv2d_67[0][0]                  
                                                                 conv2d_72[0][0]                  
__________________________________________________________________________________________________
batch_normalization_8 (BatchNor (None, 4, 4, 512)    2048        concatenate_10[0][0]             
__________________________________________________________________________________________________
conv2d_78 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_8[0][0]      
__________________________________________________________________________________________________
conv2d_79 (Conv2D)              (None, 4, 4, 96)     64608       conv2d_78[0][0]                  
__________________________________________________________________________________________________
conv2d_75 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_8[0][0]      
__________________________________________________________________________________________________
conv2d_80 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_79[0][0]                  
__________________________________________________________________________________________________
average_pooling2d_7 (AveragePoo (None, 4, 4, 512)    0           batch_normalization_8[0][0]      
__________________________________________________________________________________________________
conv2d_76 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_75[0][0]                  
__________________________________________________________________________________________________
conv2d_81 (Conv2D)              (None, 4, 4, 112)    87920       conv2d_80[0][0]                  
__________________________________________________________________________________________________
conv2d_73 (Conv2D)              (None, 4, 4, 64)     32832       average_pooling2d_7[0][0]        
__________________________________________________________________________________________________
conv2d_74 (Conv2D)              (None, 4, 4, 192)    98496       batch_normalization_8[0][0]      
__________________________________________________________________________________________________
conv2d_77 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_76[0][0]                  
__________________________________________________________________________________________________
conv2d_82 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_81[0][0]                  
__________________________________________________________________________________________________
concatenate_11 (Concatenate)    (None, 4, 4, 512)    0           conv2d_73[0][0]                  
                                                                 conv2d_74[0][0]                  
                                                                 conv2d_77[0][0]                  
                                                                 conv2d_82[0][0]                  
__________________________________________________________________________________________________
batch_normalization_9 (BatchNor (None, 4, 4, 512)    2048        concatenate_11[0][0]             
__________________________________________________________________________________________________
conv2d_88 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_9[0][0]      
__________________________________________________________________________________________________
conv2d_89 (Conv2D)              (None, 4, 4, 96)     64608       conv2d_88[0][0]                  
__________________________________________________________________________________________________
conv2d_85 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_9[0][0]      
__________________________________________________________________________________________________
conv2d_90 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_89[0][0]                  
__________________________________________________________________________________________________
average_pooling2d_8 (AveragePoo (None, 4, 4, 512)    0           batch_normalization_9[0][0]      
__________________________________________________________________________________________________
conv2d_86 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_85[0][0]                  
__________________________________________________________________________________________________
conv2d_91 (Conv2D)              (None, 4, 4, 112)    87920       conv2d_90[0][0]                  
__________________________________________________________________________________________________
conv2d_83 (Conv2D)              (None, 4, 4, 64)     32832       average_pooling2d_8[0][0]        
__________________________________________________________________________________________________
conv2d_84 (Conv2D)              (None, 4, 4, 192)    98496       batch_normalization_9[0][0]      
__________________________________________________________________________________________________
conv2d_87 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_86[0][0]                  
__________________________________________________________________________________________________
conv2d_92 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_91[0][0]                  
__________________________________________________________________________________________________
concatenate_12 (Concatenate)    (None, 4, 4, 512)    0           conv2d_83[0][0]                  
                                                                 conv2d_84[0][0]                  
                                                                 conv2d_87[0][0]                  
                                                                 conv2d_92[0][0]                  
__________________________________________________________________________________________________
batch_normalization_10 (BatchNo (None, 4, 4, 512)    2048        concatenate_12[0][0]             
__________________________________________________________________________________________________
conv2d_98 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_10[0][0]     
__________________________________________________________________________________________________
conv2d_99 (Conv2D)              (None, 4, 4, 96)     64608       conv2d_98[0][0]                  
__________________________________________________________________________________________________
conv2d_95 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_10[0][0]     
__________________________________________________________________________________________________
conv2d_100 (Conv2D)             (None, 4, 4, 112)    75376       conv2d_99[0][0]                  
__________________________________________________________________________________________________
average_pooling2d_9 (AveragePoo (None, 4, 4, 512)    0           batch_normalization_10[0][0]     
__________________________________________________________________________________________________
conv2d_96 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_95[0][0]                  
__________________________________________________________________________________________________
conv2d_101 (Conv2D)             (None, 4, 4, 112)    87920       conv2d_100[0][0]                 
__________________________________________________________________________________________________
conv2d_93 (Conv2D)              (None, 4, 4, 64)     32832       average_pooling2d_9[0][0]        
__________________________________________________________________________________________________
conv2d_94 (Conv2D)              (None, 4, 4, 192)    98496       batch_normalization_10[0][0]     
__________________________________________________________________________________________________
conv2d_97 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_96[0][0]                  
__________________________________________________________________________________________________
conv2d_102 (Conv2D)             (None, 4, 4, 128)    100480      conv2d_101[0][0]                 
__________________________________________________________________________________________________
concatenate_13 (Concatenate)    (None, 4, 4, 512)    0           conv2d_93[0][0]                  
                                                                 conv2d_94[0][0]                  
                                                                 conv2d_97[0][0]                  
                                                                 conv2d_102[0][0]                 
__________________________________________________________________________________________________
batch_normalization_11 (BatchNo (None, 4, 4, 512)    2048        concatenate_13[0][0]             
__________________________________________________________________________________________________
conv2d_108 (Conv2D)             (None, 4, 4, 96)     49248       batch_normalization_11[0][0]     
__________________________________________________________________________________________________
conv2d_109 (Conv2D)             (None, 4, 4, 96)     64608       conv2d_108[0][0]                 
__________________________________________________________________________________________________
conv2d_105 (Conv2D)             (None, 4, 4, 96)     49248       batch_normalization_11[0][0]     
__________________________________________________________________________________________________
conv2d_110 (Conv2D)             (None, 4, 4, 112)    75376       conv2d_109[0][0]                 
__________________________________________________________________________________________________
average_pooling2d_10 (AveragePo (None, 4, 4, 512)    0           batch_normalization_11[0][0]     
__________________________________________________________________________________________________
conv2d_106 (Conv2D)             (None, 4, 4, 112)    75376       conv2d_105[0][0]                 
__________________________________________________________________________________________________
conv2d_111 (Conv2D)             (None, 4, 4, 112)    87920       conv2d_110[0][0]                 
__________________________________________________________________________________________________
conv2d_103 (Conv2D)             (None, 4, 4, 64)     32832       average_pooling2d_10[0][0]       
__________________________________________________________________________________________________
conv2d_104 (Conv2D)             (None, 4, 4, 192)    98496       batch_normalization_11[0][0]     
__________________________________________________________________________________________________
conv2d_107 (Conv2D)             (None, 4, 4, 128)    100480      conv2d_106[0][0]                 
__________________________________________________________________________________________________
conv2d_112 (Conv2D)             (None, 4, 4, 128)    100480      conv2d_111[0][0]                 
__________________________________________________________________________________________________
concatenate_14 (Concatenate)    (None, 4, 4, 512)    0           conv2d_103[0][0]                 
                                                                 conv2d_104[0][0]                 
                                                                 conv2d_107[0][0]                 
                                                                 conv2d_112[0][0]                 
__________________________________________________________________________________________________
batch_normalization_12 (BatchNo (None, 4, 4, 512)    2048        concatenate_14[0][0]             
__________________________________________________________________________________________________
conv2d_115 (Conv2D)             (None, 4, 4, 128)    65664       batch_normalization_12[0][0]     
__________________________________________________________________________________________________
conv2d_116 (Conv2D)             (None, 4, 4, 128)    114816      conv2d_115[0][0]                 
__________________________________________________________________________________________________
conv2d_113 (Conv2D)             (None, 4, 4, 96)     49248       batch_normalization_12[0][0]     
__________________________________________________________________________________________________
conv2d_117 (Conv2D)             (None, 4, 4, 160)    143520      conv2d_116[0][0]                 
__________________________________________________________________________________________________
max_pooling2d_3 (MaxPooling2D)  (None, 1, 1, 512)    0           batch_normalization_12[0][0]     
__________________________________________________________________________________________________
conv2d_114 (Conv2D)             (None, 1, 1, 96)     83040       conv2d_113[0][0]                 
__________________________________________________________________________________________________
conv2d_118 (Conv2D)             (None, 1, 1, 160)    230560      conv2d_117[0][0]                 
__________________________________________________________________________________________________
concatenate_15 (Concatenate)    (None, 1, 1, 768)    0           max_pooling2d_3[0][0]            
                                                                 conv2d_114[0][0]                 
                                                                 conv2d_118[0][0]                 
__________________________________________________________________________________________________
batch_normalization_13 (BatchNo (None, 1, 1, 768)    3072        concatenate_15[0][0]             
__________________________________________________________________________________________________
conv2d_124 (Conv2D)             (None, 1, 1, 192)    147648      batch_normalization_13[0][0]     
__________________________________________________________________________________________________
conv2d_125 (Conv2D)             (None, 1, 1, 224)    129248      conv2d_124[0][0]                 
__________________________________________________________________________________________________
average_pooling2d_11 (AveragePo (None, 1, 1, 768)    0           batch_normalization_13[0][0]     
__________________________________________________________________________________________________
conv2d_121 (Conv2D)             (None, 1, 1, 192)    147648      batch_normalization_13[0][0]     
__________________________________________________________________________________________________
conv2d_126 (Conv2D)             (None, 1, 1, 256)    172288      conv2d_125[0][0]                 
__________________________________________________________________________________________________
conv2d_119 (Conv2D)             (None, 1, 1, 128)    98432       average_pooling2d_11[0][0]       
__________________________________________________________________________________________________
conv2d_120 (Conv2D)             (None, 1, 1, 128)    98432       batch_normalization_13[0][0]     
__________________________________________________________________________________________________
conv2d_122 (Conv2D)             (None, 1, 1, 128)    73856       conv2d_121[0][0]                 
__________________________________________________________________________________________________
conv2d_123 (Conv2D)             (None, 1, 1, 128)    73856       conv2d_121[0][0]                 
__________________________________________________________________________________________________
conv2d_127 (Conv2D)             (None, 1, 1, 128)    98432       conv2d_126[0][0]                 
__________________________________________________________________________________________________
conv2d_128 (Conv2D)             (None, 1, 1, 128)    98432       conv2d_126[0][0]                 
__________________________________________________________________________________________________
concatenate_16 (Concatenate)    (None, 1, 1, 768)    0           conv2d_119[0][0]                 
                                                                 conv2d_120[0][0]                 
                                                                 conv2d_122[0][0]                 
                                                                 conv2d_123[0][0]                 
                                                                 conv2d_127[0][0]                 
                                                                 conv2d_128[0][0]                 
__________________________________________________________________________________________________
batch_normalization_14 (BatchNo (None, 1, 1, 768)    3072        concatenate_16[0][0]             
__________________________________________________________________________________________________
conv2d_134 (Conv2D)             (None, 1, 1, 192)    147648      batch_normalization_14[0][0]     
__________________________________________________________________________________________________
conv2d_135 (Conv2D)             (None, 1, 1, 224)    129248      conv2d_134[0][0]                 
__________________________________________________________________________________________________
average_pooling2d_12 (AveragePo (None, 1, 1, 768)    0           batch_normalization_14[0][0]     
__________________________________________________________________________________________________
conv2d_131 (Conv2D)             (None, 1, 1, 192)    147648      batch_normalization_14[0][0]     
__________________________________________________________________________________________________
conv2d_136 (Conv2D)             (None, 1, 1, 256)    172288      conv2d_135[0][0]                 
__________________________________________________________________________________________________
conv2d_129 (Conv2D)             (None, 1, 1, 128)    98432       average_pooling2d_12[0][0]       
__________________________________________________________________________________________________
conv2d_130 (Conv2D)             (None, 1, 1, 128)    98432       batch_normalization_14[0][0]     
__________________________________________________________________________________________________
conv2d_132 (Conv2D)             (None, 1, 1, 128)    73856       conv2d_131[0][0]                 
__________________________________________________________________________________________________
conv2d_133 (Conv2D)             (None, 1, 1, 128)    73856       conv2d_131[0][0]                 
__________________________________________________________________________________________________
conv2d_137 (Conv2D)             (None, 1, 1, 128)    98432       conv2d_136[0][0]                 
__________________________________________________________________________________________________
conv2d_138 (Conv2D)             (None, 1, 1, 128)    98432       conv2d_136[0][0]                 
__________________________________________________________________________________________________
concatenate_17 (Concatenate)    (None, 1, 1, 768)    0           conv2d_129[0][0]                 
                                                                 conv2d_130[0][0]                 
                                                                 conv2d_132[0][0]                 
                                                                 conv2d_133[0][0]                 
                                                                 conv2d_137[0][0]                 
                                                                 conv2d_138[0][0]                 
__________________________________________________________________________________________________
batch_normalization_15 (BatchNo (None, 1, 1, 768)    3072        concatenate_17[0][0]             
__________________________________________________________________________________________________
conv2d_144 (Conv2D)             (None, 1, 1, 192)    147648      batch_normalization_15[0][0]     
__________________________________________________________________________________________________
conv2d_145 (Conv2D)             (None, 1, 1, 224)    129248      conv2d_144[0][0]                 
__________________________________________________________________________________________________
average_pooling2d_13 (AveragePo (None, 1, 1, 768)    0           batch_normalization_15[0][0]     
__________________________________________________________________________________________________
conv2d_141 (Conv2D)             (None, 1, 1, 192)    147648      batch_normalization_15[0][0]     
__________________________________________________________________________________________________
conv2d_146 (Conv2D)             (None, 1, 1, 256)    172288      conv2d_145[0][0]                 
__________________________________________________________________________________________________
conv2d_139 (Conv2D)             (None, 1, 1, 128)    98432       average_pooling2d_13[0][0]       
__________________________________________________________________________________________________
conv2d_140 (Conv2D)             (None, 1, 1, 128)    98432       batch_normalization_15[0][0]     
__________________________________________________________________________________________________
conv2d_142 (Conv2D)             (None, 1, 1, 128)    73856       conv2d_141[0][0]                 
__________________________________________________________________________________________________
conv2d_143 (Conv2D)             (None, 1, 1, 128)    73856       conv2d_141[0][0]                 
__________________________________________________________________________________________________
conv2d_147 (Conv2D)             (None, 1, 1, 128)    98432       conv2d_146[0][0]                 
__________________________________________________________________________________________________
conv2d_148 (Conv2D)             (None, 1, 1, 128)    98432       conv2d_146[0][0]                 
__________________________________________________________________________________________________
concatenate_18 (Concatenate)    (None, 1, 1, 768)    0           conv2d_139[0][0]                 
                                                                 conv2d_140[0][0]                 
                                                                 conv2d_142[0][0]                 
                                                                 conv2d_143[0][0]                 
                                                                 conv2d_147[0][0]                 
                                                                 conv2d_148[0][0]                 
__________________________________________________________________________________________________
batch_normalization_16 (BatchNo (None, 1, 1, 768)    3072        concatenate_18[0][0]             
__________________________________________________________________________________________________
average_pooling2d_14 (AveragePo (None, 1, 1, 768)    0           batch_normalization_16[0][0]     
__________________________________________________________________________________________________
flatten (Flatten)               (None, 768)          0           average_pooling2d_14[0][0]       
__________________________________________________________________________________________________
dense (Dense)                   (None, 256)          196864      flatten[0][0]                    
__________________________________________________________________________________________________
dropout (Dropout)               (None, 256)          0           dense[0][0]                      
__________________________________________________________________________________________________
dense_1 (Dense)                 (None, 2)            514         dropout[0][0]                    
==================================================================================================
Total params: 12,173,266
Trainable params: 12,155,986
Non-trainable params: 17,280
__________________________________________________________________________________________________
In [37]:
continue_training = True

if continue_training == True:
    best_model_filepath = modelname + "_best9305"+ ".hdf5"
    model.load_weights(best_model_filepath)
In [38]:
# Create checkpoint for the training
# This checkpoint performs model saving when
# an epoch gives highest testing accuracy
# filepath        = modelname + ".hdf5"
# checkpoint      = ModelCheckpoint(filepath, 
#                                   monitor='val_acc', 
#                                   verbose=0, 
#                                   save_best_only=True, 
#                                   mode='max')

#                             # Log the epoch detail into csv
# csv_logger      = CSVLogger(modelname +'.csv')
# callbacks_list  = [checkpoint, csv_logger]

def lrSchedule(epoch):
    lr  = 1e-3
    
    if epoch > 270: #190
        lr  *= 0.5e-3
        
    elif epoch > 240: #160
        lr  *= 1e-3
        
    elif epoch > 200: #140
        lr  *= 1e-2
        
    elif epoch > 150: #100
        lr  *= 1e-1
        
    print('Learning rate: ', lr)
    
    return lr

LRScheduler     = LearningRateScheduler(lrSchedule)

                            # Create checkpoint for the training
                            # This checkpoint performs model saving when
                            # an epoch gives highest testing accuracy
filepath        = modelname + ".hdf5"
checkpoint      = ModelCheckpoint(filepath, 
                                  monitor='val_acc', 
                                  verbose=0, 
                                  save_best_only=True, 
                                  mode='max')

                            # Log the epoch detail into csv
csv_logger      = CSVLogger(modelname +'.csv')
callbacks_list  = [checkpoint, csv_logger, LRScheduler]
In [ ]:
# Fit the model
# This is where the training starts
# model.fit(trDat, 
#           trLbl, 
#           validation_data=(tsDat, tsLbl), 
#           epochs=60, 
#           batch_size=32,
#           callbacks=callbacks_list)

datagen = ImageDataGenerator(width_shift_range=0.25,
                             height_shift_range=0.25,
                             rotation_range=45,
                             zoom_range=0.8,
                             #zca_epsilon=1e-6,
                             #zca_whitening=True,
                             fill_mode='nearest',
                             horizontal_flip=True,
                             vertical_flip=False)

model.fit_generator(datagen.flow(trDat, trLbl, batch_size=16),
                    validation_data=(tsDat, tsLbl),
                    epochs=300, #300 
                    verbose=1,
                    steps_per_epoch=len(trDat)/16,
                    callbacks=callbacks_list)
Learning rate:  0.001
Epoch 1/300
924/924 [==============================] - 140s 152ms/step - loss: 0.2603 - acc: 0.8994 - val_loss: 0.2142 - val_acc: 0.9215
Learning rate:  0.001
Epoch 2/300
924/924 [==============================] - 134s 145ms/step - loss: 0.2588 - acc: 0.8997 - val_loss: 0.2181 - val_acc: 0.9169
Learning rate:  0.001
Epoch 3/300
924/924 [==============================] - 145s 157ms/step - loss: 0.2590 - acc: 0.8968 - val_loss: 0.2173 - val_acc: 0.9140
Learning rate:  0.001
Epoch 4/300
924/924 [==============================] - 137s 149ms/step - loss: 0.2607 - acc: 0.8983 - val_loss: 0.2308 - val_acc: 0.9121
Learning rate:  0.001
Epoch 5/300
924/924 [==============================] - 136s 148ms/step - loss: 0.2598 - acc: 0.8965 - val_loss: 0.2068 - val_acc: 0.9232
Learning rate:  0.001
Epoch 6/300
924/924 [==============================] - 136s 147ms/step - loss: 0.2620 - acc: 0.8986 - val_loss: 0.2190 - val_acc: 0.9177
Learning rate:  0.001
Epoch 7/300
924/924 [==============================] - 139s 150ms/step - loss: 0.2522 - acc: 0.9004 - val_loss: 0.2215 - val_acc: 0.9164
Learning rate:  0.001
Epoch 8/300
924/924 [==============================] - 139s 150ms/step - loss: 0.2625 - acc: 0.8983 - val_loss: 0.2179 - val_acc: 0.9280
Learning rate:  0.001
Epoch 9/300
924/924 [==============================] - 141s 153ms/step - loss: 0.2613 - acc: 0.8996 - val_loss: 0.2080 - val_acc: 0.9205
Learning rate:  0.001
Epoch 10/300
924/924 [==============================] - 141s 153ms/step - loss: 0.2550 - acc: 0.8998 - val_loss: 0.2038 - val_acc: 0.9264
Learning rate:  0.001
Epoch 11/300
924/924 [==============================] - 137s 148ms/step - loss: 0.2566 - acc: 0.8984 - val_loss: 0.2048 - val_acc: 0.9283
Learning rate:  0.001
Epoch 12/300
924/924 [==============================] - 138s 150ms/step - loss: 0.2546 - acc: 0.8977 - val_loss: 0.2069 - val_acc: 0.9234
Learning rate:  0.001
Epoch 13/300
924/924 [==============================] - 134s 145ms/step - loss: 0.2556 - acc: 0.9006 - val_loss: 0.2524 - val_acc: 0.9029
Learning rate:  0.001
Epoch 14/300
924/924 [==============================] - 135s 146ms/step - loss: 0.2607 - acc: 0.8963 - val_loss: 0.2350 - val_acc: 0.9137
Learning rate:  0.001
Epoch 15/300
924/924 [==============================] - 130s 141ms/step - loss: 0.2571 - acc: 0.8977 - val_loss: 0.2383 - val_acc: 0.9218
Learning rate:  0.001
Epoch 16/300
924/924 [==============================] - 126s 136ms/step - loss: 0.2611 - acc: 0.8997 - val_loss: 0.1987 - val_acc: 0.9251
Learning rate:  0.001
Epoch 17/300
924/924 [==============================] - 133s 144ms/step - loss: 0.2538 - acc: 0.9004 - val_loss: 0.2093 - val_acc: 0.9315
Learning rate:  0.001
Epoch 18/300
924/924 [==============================] - 128s 138ms/step - loss: 0.2550 - acc: 0.8982 - val_loss: 0.2001 - val_acc: 0.9213
Learning rate:  0.001
Epoch 19/300
924/924 [==============================] - 125s 136ms/step - loss: 0.2599 - acc: 0.8994 - val_loss: 0.2008 - val_acc: 0.9286
Learning rate:  0.001
Epoch 20/300
924/924 [==============================] - 127s 138ms/step - loss: 0.2594 - acc: 0.8980 - val_loss: 0.1995 - val_acc: 0.9245
Learning rate:  0.001
Epoch 21/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2587 - acc: 0.8989 - val_loss: 0.2223 - val_acc: 0.9213
Learning rate:  0.001
Epoch 22/300
924/924 [==============================] - 124s 134ms/step - loss: 0.2573 - acc: 0.9007 - val_loss: 0.2087 - val_acc: 0.9272
Learning rate:  0.001
Epoch 23/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2511 - acc: 0.9008 - val_loss: 0.1942 - val_acc: 0.9256
Learning rate:  0.001
Epoch 24/300
924/924 [==============================] - 122s 133ms/step - loss: 0.2574 - acc: 0.8986 - val_loss: 0.2607 - val_acc: 0.8985
Learning rate:  0.001
Epoch 25/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2602 - acc: 0.8995 - val_loss: 0.1934 - val_acc: 0.9297
Learning rate:  0.001
Epoch 26/300
924/924 [==============================] - 124s 134ms/step - loss: 0.2512 - acc: 0.9029 - val_loss: 0.2170 - val_acc: 0.9248
Learning rate:  0.001
Epoch 27/300
924/924 [==============================] - 133s 144ms/step - loss: 0.2520 - acc: 0.9019 - val_loss: 0.1933 - val_acc: 0.9278
Learning rate:  0.001
Epoch 28/300
924/924 [==============================] - 141s 152ms/step - loss: 0.2576 - acc: 0.8997 - val_loss: 0.1869 - val_acc: 0.9318
Learning rate:  0.001
Epoch 29/300
924/924 [==============================] - 137s 148ms/step - loss: 0.2548 - acc: 0.9015 - val_loss: 0.2382 - val_acc: 0.9242
Learning rate:  0.001
Epoch 30/300
924/924 [==============================] - 134s 146ms/step - loss: 0.2541 - acc: 0.9004 - val_loss: 0.2028 - val_acc: 0.9299
Learning rate:  0.001
Epoch 31/300
924/924 [==============================] - 144s 156ms/step - loss: 0.2576 - acc: 0.9017 - val_loss: 0.2035 - val_acc: 0.9329
Learning rate:  0.001
Epoch 32/300
924/924 [==============================] - 137s 148ms/step - loss: 0.2487 - acc: 0.9007 - val_loss: 0.1917 - val_acc: 0.9305
Learning rate:  0.001
Epoch 33/300
924/924 [==============================] - 138s 150ms/step - loss: 0.2517 - acc: 0.9017 - val_loss: 0.1952 - val_acc: 0.9315
Learning rate:  0.001
Epoch 34/300
924/924 [==============================] - 136s 147ms/step - loss: 0.2530 - acc: 0.9032 - val_loss: 0.2157 - val_acc: 0.9153
Learning rate:  0.001
Epoch 35/300
924/924 [==============================] - 139s 151ms/step - loss: 0.2571 - acc: 0.9017 - val_loss: 0.2191 - val_acc: 0.9299
Learning rate:  0.001
Epoch 36/300
924/924 [==============================] - 127s 138ms/step - loss: 0.2470 - acc: 0.9044 - val_loss: 0.2019 - val_acc: 0.9210
Learning rate:  0.001
Epoch 37/300
924/924 [==============================] - 130s 141ms/step - loss: 0.2527 - acc: 0.9023 - val_loss: 0.2250 - val_acc: 0.9110
Learning rate:  0.001
Epoch 38/300
924/924 [==============================] - 123s 133ms/step - loss: 0.2520 - acc: 0.9027 - val_loss: 0.2598 - val_acc: 0.8920
Learning rate:  0.001
Epoch 39/300
924/924 [==============================] - 125s 136ms/step - loss: 0.2470 - acc: 0.9023 - val_loss: 0.1860 - val_acc: 0.9307
Learning rate:  0.001
Epoch 40/300
924/924 [==============================] - 135s 147ms/step - loss: 0.2520 - acc: 0.9037 - val_loss: 0.2238 - val_acc: 0.9191
Learning rate:  0.001
Epoch 41/300
924/924 [==============================] - 131s 142ms/step - loss: 0.2494 - acc: 0.9016 - val_loss: 0.1956 - val_acc: 0.9286
Learning rate:  0.001
Epoch 42/300
924/924 [==============================] - 125s 135ms/step - loss: 0.2534 - acc: 0.9019 - val_loss: 0.2071 - val_acc: 0.9261
Learning rate:  0.001
Epoch 43/300
924/924 [==============================] - 126s 137ms/step - loss: 0.2492 - acc: 0.9041 - val_loss: 0.1848 - val_acc: 0.9329
Learning rate:  0.001
Epoch 44/300
924/924 [==============================] - 123s 133ms/step - loss: 0.2505 - acc: 0.9023 - val_loss: 0.1932 - val_acc: 0.9240
Learning rate:  0.001
Epoch 45/300
924/924 [==============================] - 127s 137ms/step - loss: 0.2501 - acc: 0.9000 - val_loss: 0.2062 - val_acc: 0.9215
Learning rate:  0.001
Epoch 46/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2491 - acc: 0.9029 - val_loss: 0.1889 - val_acc: 0.9318
Learning rate:  0.001
Epoch 47/300
924/924 [==============================] - 126s 136ms/step - loss: 0.2434 - acc: 0.9053 - val_loss: 0.2121 - val_acc: 0.9183
Learning rate:  0.001
Epoch 48/300
924/924 [==============================] - 127s 138ms/step - loss: 0.2487 - acc: 0.9042 - val_loss: 0.2032 - val_acc: 0.9237
Learning rate:  0.001
Epoch 49/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2508 - acc: 0.9045 - val_loss: 0.1976 - val_acc: 0.9315
Learning rate:  0.001
Epoch 50/300
924/924 [==============================] - 123s 133ms/step - loss: 0.2468 - acc: 0.9057 - val_loss: 0.1915 - val_acc: 0.9275
Learning rate:  0.001
Epoch 51/300
924/924 [==============================] - 125s 135ms/step - loss: 0.2482 - acc: 0.9017 - val_loss: 0.2039 - val_acc: 0.9232
Learning rate:  0.001
Epoch 52/300
924/924 [==============================] - 125s 135ms/step - loss: 0.2439 - acc: 0.9040 - val_loss: 0.1870 - val_acc: 0.9307
Learning rate:  0.001
Epoch 53/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2473 - acc: 0.9021 - val_loss: 0.2090 - val_acc: 0.9223
Learning rate:  0.001
Epoch 54/300
924/924 [==============================] - 125s 136ms/step - loss: 0.2544 - acc: 0.9026 - val_loss: 0.1864 - val_acc: 0.9310
Learning rate:  0.001
Epoch 55/300
924/924 [==============================] - 124s 135ms/step - loss: 0.2441 - acc: 0.9045 - val_loss: 0.2064 - val_acc: 0.9299
Learning rate:  0.001
Epoch 56/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2483 - acc: 0.9022 - val_loss: 0.2016 - val_acc: 0.9302
Learning rate:  0.001
Epoch 57/300
924/924 [==============================] - 124s 135ms/step - loss: 0.2501 - acc: 0.9060 - val_loss: 0.2030 - val_acc: 0.9245
Learning rate:  0.001
Epoch 58/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2498 - acc: 0.9025 - val_loss: 0.1907 - val_acc: 0.9299
Learning rate:  0.001
Epoch 59/300
924/924 [==============================] - 126s 137ms/step - loss: 0.2514 - acc: 0.9039 - val_loss: 0.2043 - val_acc: 0.9248
Learning rate:  0.001
Epoch 60/300
924/924 [==============================] - 125s 135ms/step - loss: 0.2390 - acc: 0.9051 - val_loss: 0.2226 - val_acc: 0.9261
Learning rate:  0.001
Epoch 61/300
924/924 [==============================] - 124s 134ms/step - loss: 0.2445 - acc: 0.9023 - val_loss: 0.1824 - val_acc: 0.9315
Learning rate:  0.001
Epoch 62/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2492 - acc: 0.9034 - val_loss: 0.2133 - val_acc: 0.9148
Learning rate:  0.001
Epoch 63/300
924/924 [==============================] - 126s 136ms/step - loss: 0.2414 - acc: 0.9061 - val_loss: 0.1840 - val_acc: 0.9313
Learning rate:  0.001
Epoch 64/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2480 - acc: 0.9033 - val_loss: 0.1988 - val_acc: 0.9297
Learning rate:  0.001
Epoch 65/300
924/924 [==============================] - 123s 133ms/step - loss: 0.2416 - acc: 0.9068 - val_loss: 0.2201 - val_acc: 0.9169
Learning rate:  0.001
Epoch 66/300
924/924 [==============================] - 125s 135ms/step - loss: 0.2406 - acc: 0.9041 - val_loss: 0.1825 - val_acc: 0.9288
Learning rate:  0.001
Epoch 67/300
924/924 [==============================] - 126s 136ms/step - loss: 0.2424 - acc: 0.9058 - val_loss: 0.1907 - val_acc: 0.9291
Learning rate:  0.001
Epoch 68/300
924/924 [==============================] - 125s 136ms/step - loss: 0.2486 - acc: 0.9023 - val_loss: 0.1955 - val_acc: 0.9269
Learning rate:  0.001
Epoch 69/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2399 - acc: 0.9056 - val_loss: 0.1920 - val_acc: 0.9261
Learning rate:  0.001
Epoch 70/300
924/924 [==============================] - 124s 134ms/step - loss: 0.2512 - acc: 0.8998 - val_loss: 0.2023 - val_acc: 0.9286
Learning rate:  0.001
Epoch 71/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2445 - acc: 0.9037 - val_loss: 0.1956 - val_acc: 0.9291
Learning rate:  0.001
Epoch 72/300
924/924 [==============================] - 126s 137ms/step - loss: 0.2431 - acc: 0.9059 - val_loss: 0.1795 - val_acc: 0.9302
Learning rate:  0.001
Epoch 73/300
924/924 [==============================] - 124s 134ms/step - loss: 0.2460 - acc: 0.9040 - val_loss: 0.2035 - val_acc: 0.9234
Learning rate:  0.001
Epoch 74/300
924/924 [==============================] - 124s 134ms/step - loss: 0.2468 - acc: 0.9032 - val_loss: 0.1847 - val_acc: 0.9329
Learning rate:  0.001
Epoch 75/300
924/924 [==============================] - 128s 138ms/step - loss: 0.2446 - acc: 0.9035 - val_loss: 0.1931 - val_acc: 0.9291
Learning rate:  0.001
Epoch 76/300
924/924 [==============================] - 123s 133ms/step - loss: 0.2424 - acc: 0.9067 - val_loss: 0.2006 - val_acc: 0.9351
Learning rate:  0.001
Epoch 77/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2435 - acc: 0.9052 - val_loss: 0.2289 - val_acc: 0.9251
Learning rate:  0.001
Epoch 78/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2448 - acc: 0.9054 - val_loss: 0.2031 - val_acc: 0.9232
Learning rate:  0.001
Epoch 79/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2428 - acc: 0.9065 - val_loss: 0.2092 - val_acc: 0.9196
Learning rate:  0.001
Epoch 80/300
924/924 [==============================] - 129s 139ms/step - loss: 0.2423 - acc: 0.9081 - val_loss: 0.1988 - val_acc: 0.9291
Learning rate:  0.001
Epoch 81/300
923/924 [============================>.] - ETA: 0s - loss: 0.2404 - acc: 0.9061
In [40]:
##### Now the training is complete, we get
# another object to load the weights
# compile it, so that we can do 
# final evaluation on it
modelGo.load_weights(filepath)
modelGo.compile(loss='categorical_crossentropy', 
                optimizer='adam', 
                metrics=['accuracy'])
In [41]:
# Make classification on the test dataset
predicts    = modelGo.predict(tsDat)

# Prepare the classification output
# for the classification report
predout     = np.argmax(predicts,axis=1)
testout     = np.argmax(tsLbl,axis=1)
labelname   = ['non-flower', 'flower']
                                            # the labels for the classfication report


testScores  = metrics.accuracy_score(testout,predout)
confusion   = metrics.confusion_matrix(testout,predout)


print("Best accuracy (on testing dataset): %.2f%%" % (testScores*100))
print(metrics.classification_report(testout,predout,target_names=labelname,digits=4))
print(confusion)
Best accuracy (on testing dataset): 94.18%
              precision    recall  f1-score   support

  non-flower     0.9047    0.9516    0.9275      1446
      flower     0.9678    0.9356    0.9514      2250

    accuracy                         0.9418      3696
   macro avg     0.9362    0.9436    0.9395      3696
weighted avg     0.9431    0.9418    0.9421      3696

[[1376   70]
 [ 145 2105]]
In [42]:
import pandas as pd

records     = pd.read_csv(modelname +'.csv')
plt.figure()
plt.subplot(211)
plt.plot(records['val_loss'])
plt.plot(records['loss'])
plt.yticks([0, 0.20, 0.30, 0.4, 0.5])
plt.title('Loss value',fontsize=12)

ax          = plt.gca()
ax.set_xticklabels([])



plt.subplot(212)
plt.plot(records['val_acc'])
plt.plot(records['acc'])
plt.yticks([0.7, 0.8, 0.9, 1.0])
plt.title('Accuracy',fontsize=12)
plt.show()
In [43]:
wrong_ans_index = []

for i in range(len(predout)):
    if predout[i] != testout[i]:
        wrong_ans_index.append(i)
In [44]:
wrong_ans_index = list(set(wrong_ans_index))
In [ ]:
# Randomly show X examples of that was wrong

dataset = tsDatOrg #flowers #fungus #rocks

for index in wrong_ans_index:
    #index = wrong_ans_index[random.randint(0, len(wrong_ans_index)-1)]
    print("Showing %s index image" %(index))
    print("Predicted as %s but is actually %s" %(predout[index], testout[index]))
    imgplot = plt.imshow(data[index])
    plt.show()
Showing 2051 index image
Predicted as 0 but is actually 1
Showing 4 index image
Predicted as 0 but is actually 1
Showing 2053 index image
Predicted as 0 but is actually 1
Showing 3588 index image
Predicted as 1 but is actually 0
Showing 3079 index image
Predicted as 1 but is actually 0
Showing 3080 index image
Predicted as 1 but is actually 0
Showing 536 index image
Predicted as 0 but is actually 1
Showing 33 index image
Predicted as 0 but is actually 1
Showing 37 index image
Predicted as 0 but is actually 1
Showing 1065 index image
Predicted as 0 but is actually 1
Showing 2610 index image
Predicted as 1 but is actually 0
Showing 564 index image
Predicted as 0 but is actually 1
Showing 565 index image
Predicted as 0 but is actually 1
Showing 1079 index image
Predicted as 0 but is actually 1
Showing 1594 index image
Predicted as 0 but is actually 1
Showing 571 index image
Predicted as 0 but is actually 1
Showing 2111 index image
Predicted as 0 but is actually 1
Showing 3136 index image
Predicted as 1 but is actually 0
Showing 1603 index image
Predicted as 0 but is actually 1
Showing 3141 index image
Predicted as 1 but is actually 0
Showing 70 index image
Predicted as 0 but is actually 1
Showing 2632 index image
Predicted as 1 but is actually 0
Showing 3656 index image
Predicted as 1 but is actually 0
Showing 75 index image
Predicted as 0 but is actually 1
Showing 3068 index image
Predicted as 1 but is actually 0
Showing 1616 index image
Predicted as 0 but is actually 1
Showing 1618 index image
Predicted as 0 but is actually 1
Showing 1619 index image
Predicted as 0 but is actually 1
Showing 3155 index image
Predicted as 1 but is actually 0
Showing 597 index image
Predicted as 0 but is actually 1
Showing 1109 index image
Predicted as 0 but is actually 1
Showing 2134 index image
Predicted as 0 but is actually 1
Showing 88 index image
Predicted as 0 but is actually 1
Showing 89 index image
Predicted as 0 but is actually 1
Showing 601 index image
Predicted as 0 but is actually 1
Showing 2648 index image
Predicted as 1 but is actually 0
Showing 2651 index image
Predicted as 1 but is actually 0
Showing 3668 index image
Predicted as 1 but is actually 0
Showing 2656 index image
Predicted as 1 but is actually 0
Showing 3680 index image
Predicted as 1 but is actually 0
Showing 3688 index image
Predicted as 1 but is actually 0
Showing 1642 index image
Predicted as 0 but is actually 1
Showing 621 index image
Predicted as 0 but is actually 1
Showing 3183 index image
Predicted as 1 but is actually 0
Showing 628 index image
Predicted as 0 but is actually 1
Showing 631 index image
Predicted as 0 but is actually 1
Showing 636 index image
Predicted as 0 but is actually 1
Showing 3196 index image
Predicted as 1 but is actually 0
Showing 2176 index image
Predicted as 0 but is actually 1
Showing 642 index image
Predicted as 0 but is actually 1
Showing 3203 index image
Predicted as 1 but is actually 0
Showing 2181 index image
Predicted as 0 but is actually 1
Showing 647 index image
Predicted as 0 but is actually 1
Showing 137 index image
Predicted as 0 but is actually 1
Showing 138 index image
Predicted as 0 but is actually 1
Showing 2189 index image
Predicted as 0 but is actually 1
Showing 656 index image
Predicted as 0 but is actually 1
Showing 146 index image
Predicted as 0 but is actually 1
Showing 2195 index image
Predicted as 0 but is actually 1
Showing 1175 index image
Predicted as 0 but is actually 1
Showing 1688 index image
Predicted as 0 but is actually 1
Showing 2199 index image
Predicted as 0 but is actually 1
Showing 2713 index image
Predicted as 1 but is actually 0
Showing 3224 index image
Predicted as 1 but is actually 0
In [ ]:
# Stacking 3 NNs?